自建Qdrant迁移至AnalyticDB PostgreSQL版
Qdrant是向量相似性搜索引擎,主要用于存储、搜索和管理向量,支持通过Python编程语言将本地Qdrant集合数据迁移到AnalyticDB PostgreSQL版实例中。
前提条件
已创建Qdrant集群。
已安装Python环境,建议使用Python 3.8及以上版本。
已安装所需的Python库。
pip install psycopg2 pip install qdrant-client==1.6.0 pip install pyaml pip install tqdm
迁移操作
步骤一:导出Qdrant
准备好导出脚本
export.py
及其导出配置文件qdrant2csv.yaml
,并创建输出目录,本文以output
为例。导出脚本
export.py
如下。import yaml import json from qdrant_client import QdrantClient import os from enum import IntEnum from tqdm import tqdm with open("./qdrant2csv.yaml", "r") as f: config = yaml.safe_load(f) print("configuration:") print(config) qdrant_config = config["qdrant"] class DataType(IntEnum): ID = 1 FLOAT_VECTOR = 2 JSON = 3 def data_convert_to_str(data, dtype, delimeter): if dtype == DataType.ID: return str(data) elif dtype == DataType.FLOAT_VECTOR: return "{" + ", ".join(str(x) for x in data) + "}" elif dtype == DataType.JSON: return str(data).replace(delimeter, f"\\{delimeter}").replace("\"", "\\\"") Exception(f"Unsupported DataType {dtype}") def csv_write_rows(datum, fd, fields_types, delimiter="|"): for data in datum: for i in range(len(data)): data[i] = data_convert_to_str(data[i], fields_types[i], delimiter) fd.write(delimiter.join(data) + "\n") def csv_write_header(headers, fd, delimiter="|"): fd.write(delimiter.join(headers) + "\n") def dump_collection(collection_name: str): results = [] file_cnt = 0 print("connecting to qdrant...") client = QdrantClient(**qdrant_config) export_config = config["export"] tmp_path = os.path.join(export_config["output_path"], collection_name) if not os.path.exists(tmp_path): os.mkdir(tmp_path) # fetch info of collection fields_meta_list = ["id bigint"] fields_types = [DataType.ID] headers = ["id"] collection = client.get_collection(collection_name) total_num = collection.points_count if isinstance(collection.config.params.vectors, dict): # multi vectors for vec_name in collection.config.params.vectors.keys(): fields_types.append(DataType.FLOAT_VECTOR) fields_meta_list.append(f"{vec_name} real[]") headers.append(vec_name) else: # single vector fields_types.append(DataType.FLOAT_VECTOR) fields_meta_list.append("vector real[]") headers.append("vector") fields_types.append(DataType.JSON) fields_meta_list.append("payload json") headers.append("payload") fields_meta_str = ','.join(fields_meta_list) create_table_sql = f"CREATE TABLE {collection_name} " \ f" ({fields_meta_str});" with open(os.path.join(export_config["output_path"], collection_name, "create_table.sql"), "w") as f_d: f_d.write(create_table_sql) print(create_table_sql) def write_to_csv_file(col_names, data): if len(results) == 0: return nonlocal file_cnt assert(file_cnt <= 1e9) output_file_name = os.path.join(export_config["output_path"], collection_name, f"{str(file_cnt).zfill(10)}.csv") with open(output_file_name, "w", newline="") as csv_file: # write header csv_write_header(col_names, csv_file) # write data csv_write_rows(data, csv_file, fields_types) file_cnt += 1 results.clear() offset_id = None with tqdm(total=total_num, bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt}") as pbar: while True: res = client.scroll(collection_name=collection_name, limit=1000, offset=offset_id, with_payload=True, with_vectors=True) records = res[0] for record in records: # append id record_list = [record.id] # append vectors if isinstance(record.vector, dict): # multi vector for vector_name in headers[1:-1]: record_list.append(record.vector[vector_name]) else: # single vector record_list.append(record.vector) # append payload record_list.append(json.dumps(record.payload, ensure_ascii=False)) results.append(record_list) if len(results) >= export_config["max_line_in_file"]: write_to_csv_file(headers, data=results) pbar.update(1) if len(res) == 0 or len(res[0]) == 0 or res[1] is None: # finished break else: offset_id = res[1] write_to_csv_file(headers, data=results) for name in config["export"]["collections"]: dump_collection(name)
导出配置文件
qdrant2csv.yaml
如下。qdrant: # 连接Qdrant的配置项 host: 'localhost' # Qdrant的主机地址 port: 6333 # Qdrant端口号,默认值:6433 grpc_port: 6434 # gRPC 接口的端口,默认值: 6334 api_key: '' # 用于在Qdrant Cloud中进行身份验证的API密钥 url: '' # 主机名或者字符串"Optional[scheme], host, Optional[port], Optional[prefix]" location: '' # 填写memory,则以内存模式连接实例;填写普通的字符串,则等同于填写url配置项;如果不填,则会通过host+port连接实例 export: collections: - 'test_collection' - 'multi' # 填写所有需要导出的collection max_line_in_file: 40000 # 文件切分行数 output_path: './output' # 导出目标目录
将导出脚本
export.py
、导出配置文件qdrant2csv.yaml
及输出目录output
存放至同一个目录下。目录层级如下。├── export.py ├── qdrant2csv.yaml └── output
根据Qdrant集群信息,修改
qdrant2csv.yaml
中的配置项。运行Python脚本,并查看输出结果。
python export.py
输出结果如下。
. ├── export.py ├── qdrant2csv.yaml └── output ├── test_collection │ ├── 0000000000.csv │ ├── 0000000001.csv │ ├── 0000000002.csv │ └── create_table.sql └── multi ├── 0000000000.csv └── create_table.sql
步骤二:导入AnalyticDB PostgreSQL版向量数据库
准备好导入脚本
import.py
、导入配置文件csv2adbpg.yaml
及需要导入的数据data(即在导出步骤中得到的output目录)。导入脚本
import.py
如下。import psycopg2 import yaml import glob import os if __name__ == "__main__": with open('csv2adbpg.yaml', 'r') as config_file: config = yaml.safe_load(config_file) print("current config:" + str(config)) db_host = config['database']['host'] db_port = config['database']['port'] db_name = config['database']['name'] schema_name = config['database']['schema'] db_user = config['database']['user'] db_password = config['database']['password'] data_path = config['data_path'] conn = psycopg2.connect( host=db_host, port=db_port, database=db_name, user=db_user, password=db_password, options=f'-c search_path={schema_name},public' ) cur = conn.cursor() # check schema cur.execute("SELECT schema_name FROM information_schema.schemata WHERE schema_name = %s", (schema_name,)) existing_schema = cur.fetchone() if existing_schema: print(f"Schema {schema_name} already exists.") else: # create schema cur.execute(f"CREATE SCHEMA {schema_name}") print(f"Created schema: {schema_name}") for table_name in os.listdir(data_path): table_folder = os.path.join(data_path, table_name) print(f"Begin Process table: {table_name}") if os.path.isdir(table_folder): create_table_file = os.path.join(table_folder, 'create_table.sql') with open(create_table_file, 'r') as file: create_table_sql = file.read() try: cur.execute(create_table_sql) except psycopg2.errors.DuplicateTable as e: print(e) conn.rollback() continue print(f"Created table: {table_name}") cnt = 0 csv_files = glob.glob(os.path.join(table_folder, '*.csv')) for csv_file in csv_files: with open(csv_file, 'r') as file: copy_command = f"COPY {table_name} FROM STDIN DELIMITER '|' HEADER" cur.copy_expert(copy_command, file) cnt += 1 print(f"Imported data from: {csv_file} | {cnt}/{len(csv_files)} file(s) Done") conn.commit() print(f"Finished import table: {table_name}") print('#'*60) cur.close() conn.close()
导入配置文件
csv2adbpg.yaml
如下。database: host: "192.16.XX.XX" # AnalyticDB PostgreSQL版实例的外网地址 port: 5432 # AnalyticDB PostgreSQL版实例的端口号 name: "vector_database" # 导入目标数据库名 user: "username" # AnalyticDB PostgreSQL版实例的数据库账号 password: "" # 账号密码 schema: "public" # 导入Schama名,若不存在则会自动创建 data_path: "./data" # 导入数据源
将导入脚本
import.py
和导入配置文件csv2adbpg.yaml
与需要导入的数据data存放在同一目录下。目录层级如下。. ├── csv2adbpg.yaml ├── data │ ├── test_collection │ │ ├── 0000000000.csv │ │ ├── 0000000001.csv │ │ ├── 0000000002.csv │ │ └── create_table.sql │ └── multi │ ├── 0000000000.csv │ └── create_table.sql └── import.py
根据AnalyticDB PostgreSQL版实例信息,修改
csv2adbpg.yaml
文件中配置项。运行Python脚本。
python import.py
在AnalyticDB PostgreSQL版向量数据库中检查数据是否正常导入。
重建所需要的索引。具体操作,请参见创建向量索引。
相关文档
更多关于Qdrant的介绍,请参见Qdrant产品文档。